More simplification and cleanup of the ac_timer interface.
Signed-off-by: Keir Fraser <keir@xensource.com>
}
/* callback function when vtm_timer expires */
-static void vtm_timer_fn(unsigned long data)
+static void vtm_timer_fn(void *data)
{
vtime_t *vtm;
- VCPU *vcpu = (VCPU*)data;
+ VCPU *vcpu = data;
u64 cur_itc,vitm;
UINT64 vec;
//fire_itc2 = cur_itc;
//fire_itm2 = vitm;
update_last_itc(vtm,cur_itc); // pseudo read to update vITC
- vtm->timer_hooked = 0;
}
void vtm_init(VCPU *vcpu)
itc_freq = local_cpu_data->itc_freq;
vtm->cfg_max_jump=itc_freq*MAX_JUMP_STEP/1000;
vtm->cfg_min_grun=itc_freq*MIN_GUEST_RUNNING_TIME/1000;
- /* set up the actimer */
- init_ac_timer(&(vtm->vtm_timer));
- vtm->timer_hooked = 0;
- vtm->vtm_timer.cpu = 0; /* Init value for SMP case */
- vtm->vtm_timer.data = (unsigned long)vcpu;
- vtm->vtm_timer.function = vtm_timer_fn;
+ init_ac_timer(&vtm->vtm_timer, vtm_timer_fn, vcpu, 0);
vtm_reset(vcpu);
}
vtm=&(vcpu->arch.arch_vmx.vtm);
local_irq_save(spsr);
itv = VPD_CR(vcpu, itv);
- if ( ITV_IRQ_MASK(itv) && vtm->timer_hooked ) {
- rem_ac_timer(&(vtm->vtm_timer));
- vtm->timer_hooked = 0;
- }
+ if ( ITV_IRQ_MASK(itv) )
+ rem_ac_timer(&vtm->vtm_timer);
vtm_interruption_update(vcpu, vtm);
local_irq_restore(spsr);
}
if ( diff_last >= 0 ) {
// interrupt already fired.
- if ( vtm->timer_hooked ) {
- rem_ac_timer(&(vtm->vtm_timer));
- vtm->timer_hooked = 0;
- }
+ rem_ac_timer(&vtm->vtm_timer);
}
else if ( diff_now >= 0 ) {
// ITV is fired.
vmx_vcpu_pend_interrupt(vcpu, vitv&0xff);
}
/* Both last_itc & cur_itc < itm, wait for fire condition */
- else if ( vtm->timer_hooked ) {
- expires = NOW() + tick_to_ns(0-diff_now) + TIMER_SLOP;
- set_ac_timer(&vtm->vtm_timer, expires);
- }
else {
expires = NOW() + tick_to_ns(0-diff_now) + TIMER_SLOP;
- vtm->vtm_timer.cpu = vcpu->processor;
set_ac_timer(&vtm->vtm_timer, expires);
- vtm->timer_hooked = 1;
}
local_irq_restore(spsr);
}
*/
void vtm_domain_out(VCPU *vcpu)
{
- vtime_t *vtm;
- uint64_t spsr;
-
- vtm=&(vcpu->arch.arch_vmx.vtm);
- local_irq_save(spsr);
- if ( vtm->timer_hooked ) {
- rem_ac_timer(&(vtm->vtm_timer));
- vtm->timer_hooked = 0;
- }
- local_irq_restore(spsr);
+ rem_ac_timer(&vcpu->arch.arch_vmx.vtm.vtm_timer);
}
/*
vtm_interruption_update(vcpu, vtm);
}
-
-
/*
* Next for vLSapic
*/
}
/* hooks function for the PIT initialization response iopacket */
-static void pit_timer_fn(unsigned long data)
+static void pit_timer_fn(void *data)
{
- struct vmx_virpit_t *vpit = (struct vmx_virpit_t*)data;
+ struct vmx_virpit_t *vpit = data;
- /*set the pending intr bit in shared page, send evtchn notification to myself*/
+ /* Set the pending intr bit, and send evtchn notification to myself. */
if (test_and_set_bit(vpit->vector, vpit->intr_bitmap))
- vpit->pending_intr_nr++; /* if originaly set, then count the pending intr */
+ vpit->pending_intr_nr++; /* already set, then count the pending intr */
set_ac_timer(&vpit->pit_timer, NOW() + MILLISECS(vpit->period));
}
vpit->intr_bitmap = intr;
/* set up the actimer */
- init_ac_timer(&(vpit->pit_timer));
- vpit->pit_timer.cpu = 0; /*FIXME: change for SMP */
- vpit->pit_timer.data = (unsigned long)vpit;
- vpit->pit_timer.function = pit_timer_fn;
- pit_timer_fn((unsigned long)vpit); /* timer seed */
+ init_ac_timer(&vpit->pit_timer, pit_timer_fn, vpit, 0);
+ pit_timer_fn(vpit); /* timer seed */
/*restore the state*/
p->state = STATE_IORESP_READY;
-/****************************************************************************
- * (C) 2002-2003 - Rolf Neugebauer - Intel Research Cambridge
- * (C) 2002-2003 University of Cambridge
- ****************************************************************************
- *
- * File: ac_timer.c
- * Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk)
- * Keir Fraser (kaf24@cl.cam.ac.uk)
- *
- * Environment: Xen Hypervisor
- * Description: Accurate timer for the Hypervisor
+/******************************************************************************
+ * ac_timer.c
+ *
+ * Copyright (c) 2002-2003 Rolf Neugebauer
+ * Copyright (c) 2002-2005 K A Fraser
*/
#include <xen/config.h>
int cpu = smp_processor_id();
struct ac_timer *t, **heap;
s_time_t now;
- void (*fn)(unsigned long);
+ void (*fn)(void *);
ac_timers[cpu].softirqs++;
if ( (fn = t->function) != NULL )
{
- unsigned long data = t->data;
+ void *data = t->data;
spin_unlock_irq(&ac_timers[cpu].lock);
(*fn)(data);
spin_lock_irq(&ac_timers[cpu].lock);
for ( j = 1; j <= GET_HEAP_SIZE(ac_timers[i].heap); j++ )
{
t = ac_timers[i].heap[j];
- printk (" %d : %p ex=0x%08X%08X %lu\n",
+ printk (" %d : %p ex=0x%08X%08X %p\n",
j, t, (u32)(t->expires>>32), (u32)t->expires, t->data);
}
spin_unlock_irqrestore(&ac_timers[i].lock, flags);
/* Warp/unwarp timer functions */
-static void warp_timer_fn(unsigned long pointer)
+static void warp_timer_fn(void *data)
{
- struct bvt_dom_info *inf = (struct bvt_dom_info *)pointer;
+ struct bvt_dom_info *inf = data;
unsigned int cpu = inf->domain->exec_domain[0]->processor;
spin_lock_irq(&schedule_data[cpu].schedule_lock);
spin_unlock_irq(&schedule_data[cpu].schedule_lock);
}
-static void unwarp_timer_fn(unsigned long pointer)
+static void unwarp_timer_fn(void *data)
{
- struct bvt_dom_info *inf = (struct bvt_dom_info *)pointer;
+ struct bvt_dom_info *inf = data;
unsigned int cpu = inf->domain->exec_domain[0]->processor;
spin_lock_irq(&schedule_data[cpu].schedule_lock);
inf->warp_value = 0;
inf->warpl = MILLISECS(2000);
inf->warpu = MILLISECS(1000);
- /* initialise the timers */
- init_ac_timer(&inf->warp_timer);
- inf->warp_timer.cpu = d->processor;
- inf->warp_timer.data = (unsigned long)inf;
- inf->warp_timer.function = &warp_timer_fn;
- init_ac_timer(&inf->unwarp_timer);
- inf->unwarp_timer.cpu = d->processor;
- inf->unwarp_timer.data = (unsigned long)inf;
- inf->unwarp_timer.function = &unwarp_timer_fn;
+ /* Initialise the warp timers. */
+ init_ac_timer(&inf->warp_timer, warp_timer_fn, inf, d->processor);
+ init_ac_timer(&inf->unwarp_timer, unwarp_timer_fn, inf, d->processor);
}
einf->exec_domain = d;
struct list_head runnableq;
struct list_head waitq;
struct list_head extraq[2];
+ s_time_t current_slice_expires;
};
#define EDOM_INFO(d) ((struct sedf_edom_info *)((d)->sched_priv))
d->vcpu_id);
/* Allocate per-CPU context if this is the first domain to be added. */
- if ( schedule_data[d->processor].sched_priv == NULL )
+ if ( unlikely(schedule_data[d->processor].sched_priv == NULL) )
{
schedule_data[d->processor].sched_priv =
xmalloc(struct sedf_cpu_info);
BUG_ON(schedule_data[d->processor].sched_priv == NULL);
+ memset(CPU_INFO(d->processor), 0, sizeof(*CPU_INFO(d->processor)));
INIT_LIST_HEAD(WAITQ(d->processor));
INIT_LIST_HEAD(RUNQ(d->processor));
INIT_LIST_HEAD(EXTRAQ(d->processor,EXTRA_PEN_Q));
EDOM_INFO(ret.task)->sched_start_abs = now;
CHECK(ret.time > 0);
ASSERT(sedf_runnable(ret.task));
+ CPU_INFO(cpu)->current_slice_expires = now + ret.time;
return ret;
}
other_inf = EDOM_INFO(other);
/*check whether we need to make an earlier sched-decision*/
- if ((PERIOD_BEGIN(other_inf) <
- schedule_data[other->processor].s_timer.expires))
+ if (PERIOD_BEGIN(other_inf) <
+ CPU_INFO(other->processor)->current_slice_expires)
return 1;
/*no timing-based switches need to be taken into account here*/
switch (get_run_type(cur)) {
#define TIME_SLOP (s32)MICROSECS(50) /* allow time to slip a bit */
/* Various timer handlers. */
-static void s_timer_fn(unsigned long unused);
-static void t_timer_fn(unsigned long unused);
-static void dom_timer_fn(unsigned long data);
+static void s_timer_fn(void *unused);
+static void t_timer_fn(void *unused);
+static void dom_timer_fn(void *data);
/* This is global for now so that private implementations can reach it */
struct schedule_data schedule_data[NR_CPUS];
struct domain *d = ed->domain;
/* Initialise the per-domain timer. */
- init_ac_timer(&ed->timer);
- ed->timer.cpu = ed->processor;
- ed->timer.data = (unsigned long)ed;
- ed->timer.function = &dom_timer_fn;
+ init_ac_timer(&ed->timer, dom_timer_fn, ed, ed->processor);
if ( is_idle_task(d) )
{
****************************************************************************/
/* The scheduler timer: force a run through the scheduler */
-static void s_timer_fn(unsigned long unused)
+static void s_timer_fn(void *unused)
{
raise_softirq(SCHEDULE_SOFTIRQ);
perfc_incrc(sched_irq);
}
/* Periodic tick timer: send timer event to current domain */
-static void t_timer_fn(unsigned long unused)
+static void t_timer_fn(void *unused)
{
struct exec_domain *ed = current;
unsigned int cpu = ed->processor;
}
/* Domain timer function, sends a virtual timer interrupt to domain */
-static void dom_timer_fn(unsigned long data)
+static void dom_timer_fn(void *data)
{
- struct exec_domain *ed = (struct exec_domain *)data;
+ struct exec_domain *ed = data;
update_dom_time(ed);
send_guest_virq(ed, VIRQ_TIMER);
for ( i = 0; i < NR_CPUS; i++ )
{
spin_lock_init(&schedule_data[i].schedule_lock);
-
- init_ac_timer(&schedule_data[i].s_timer);
- schedule_data[i].s_timer.cpu = i;
- schedule_data[i].s_timer.data = 2;
- schedule_data[i].s_timer.function = &s_timer_fn;
-
- init_ac_timer(&t_timer[i]);
- t_timer[i].cpu = i;
- t_timer[i].data = 3;
- t_timer[i].function = &t_timer_fn;
+ init_ac_timer(&schedule_data[i].s_timer, s_timer_fn, NULL, i);
+ init_ac_timer(&t_timer[i], t_timer_fn, NULL, i);
}
schedule_data[0].curr = idle_task[0];
uint64_t cfg_min_grun; // min guest running time since last jump
// uint64_t latest_read_itc; // latest guest read ITC
struct ac_timer vtm_timer;
- int timer_hooked; // vtm_timer is hooked
// int triggered;
-/****************************************************************************
- * (C) 2002 - Rolf Neugebauer - Intel Research Cambridge
- ****************************************************************************
- *
- * File: ac_timer.h
- * Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk)
- * Changes:
- *
- * Date: Nov 2002
+/******************************************************************************
+ * ac_timer.h
*
- * Environment: Xen Hypervisor
- * Description: Accurate timer for the Hypervisor
- *
- ****************************************************************************
- * $Id: h-insert.h,v 1.4 2002/11/08 16:03:55 rn Exp $
- ****************************************************************************
+ * Copyright (c) 2002-2003 Rolf Neugebauer
+ * Copyright (c) 2002-2005 K A Fraser
*/
#ifndef _AC_TIMER_H_
#include <xen/time.h>
struct ac_timer {
- /*
- * PUBLIC FIELDS
- */
/* System time expiry value (nanoseconds since boot). */
- s_time_t expires;
+ s_time_t expires;
/* CPU on which this timer will be installed and executed. */
- unsigned int cpu;
+ unsigned int cpu;
/* On expiry, '(*function)(data)' will be executed in softirq context. */
- unsigned long data;
- void (*function)(unsigned long);
-
- /*
- * PRIVATE FIELDS
- */
- unsigned int heap_offset;
+ void (*function)(void *);
+ void *data;
+ /* Timer-heap offset. */
+ unsigned int heap_offset;
};
/*
- * This function can be called for any CPU from any CPU in any context.
- * It initialises the private fields of the ac_timer structure.
+ * All functions below can be called for any CPU from any CPU in any context.
*/
-static __inline__ void init_ac_timer(struct ac_timer *timer)
+
+/* Returns TRUE if the given timer is on a timer list. */
+static __inline__ int active_ac_timer(struct ac_timer *timer)
{
- timer->heap_offset = 0;
+ return (timer->heap_offset != 0);
}
/*
- * This function can be called for any CPU from any CPU in any context.
- * It returns TRUE if the given timer is on a timer list.
+ * It initialises the static fields of the ac_timer structure.
+ * It can be called multiple times to reinitialise a single (inactive) timer.
*/
-static __inline__ int active_ac_timer(struct ac_timer *timer)
+static __inline__ void init_ac_timer(
+ struct ac_timer *timer,
+ void (*function)(void *),
+ void *data,
+ unsigned int cpu)
{
- return (timer->heap_offset != 0);
+ memset(timer, 0, sizeof(*timer));
+ timer->function = function;
+ timer->data = data;
+ timer->cpu = cpu;
}
/*
- * This function can be called for any CPU from any CPU in any context, BUT:
- * -- The private fields must have been initialised (ac_timer_init).
- * -- All public fields must be initialised.
+ * Set the expiry time and activate a timer (which must previously have been
+ * initialised by init_ac_timer).
*/
extern void set_ac_timer(struct ac_timer *timer, s_time_t expires);
/*
- * This function can be called for any CPU from any CPU in any context, BUT:
- * -- The private fields must have been initialised (ac_timer_init).
- * -- All public fields must be initialised.
- * -- The timer must currently be on a timer list.
+ * Deactivate a timer (which must previously have been initialised by
+ * init_ac_timer). This function has no effect if the timer is not currently
+ * active.
*/
extern void rem_ac_timer(struct ac_timer *timer);